In [9]:
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D
from keras.models import Model
from keras import backend as K

input_img = Input(shape=(32, 32, 3))  # adapt this if using `channels_first` image data format

x1 = Conv2D(16, (9, 9), activation='relu', padding='same')(input_img)
x2 = MaxPooling2D((2, 2), padding='same')(x1)
x3 = Conv2D(8, (9, 9), activation='relu', padding='same')(x2)
x4 = MaxPooling2D((2, 2), padding='same')(x3)
x5 = Conv2D(8, (9, 9), activation='relu', padding='same')(x4)
encoded = MaxPooling2D((2, 2), padding='same')(x5)

# at this point the representation is (4, 4, 8) i.e. 128-dimensional

x6 = Conv2D(8, (9, 9), activation='relu', padding='same')(encoded)
x7 = UpSampling2D((2, 2))(x6)
x8 = Conv2D(8, (9, 9), activation='relu', padding='same')(x7)
x9 = UpSampling2D((2, 2))(x8)
x10 = Conv2D(16, (9, 9), activation='relu', padding='same')(x9)
x11 = UpSampling2D((2, 2))(x10)
decoded = Conv2D(3, (9, 9), activation='sigmoid', padding='same')(x11)

autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adagrad', loss='binary_crossentropy')

from keras.datasets import cifar10
import numpy as np

(x_train, _), (x_test, _) = cifar10.load_data()

x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 32, 32, 3))  # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (len(x_test), 32, 32, 3))  # adapt this if using `channels_first` image data format

autoencoder.fit(x_train, x_train,
                epochs=50,
                batch_size=128,
                shuffle=True,
                validation_data=(x_test, x_test))


Train on 50000 samples, validate on 10000 samples
Epoch 1/50
50000/50000 [==============================] - 367s - loss: 0.6593 - val_loss: 0.6364
Epoch 2/50
50000/50000 [==============================] - 375s - loss: 0.6214 - val_loss: 0.6235
Epoch 3/50
50000/50000 [==============================] - 372s - loss: 0.6123 - val_loss: 0.6093
Epoch 4/50
50000/50000 [==============================] - 378s - loss: 0.6085 - val_loss: 0.6099
Epoch 5/50
50000/50000 [==============================] - 380s - loss: 0.6057 - val_loss: 0.6058
Epoch 6/50
50000/50000 [==============================] - 379s - loss: 0.6039 - val_loss: 0.6042
Epoch 7/50
50000/50000 [==============================] - 367s - loss: 0.6024 - val_loss: 0.6047
Epoch 8/50
50000/50000 [==============================] - 360s - loss: 0.6013 - val_loss: 0.6022
Epoch 9/50
50000/50000 [==============================] - 355s - loss: 0.6005 - val_loss: 0.6017
Epoch 10/50
50000/50000 [==============================] - 353s - loss: 0.5996 - val_loss: 0.5997
Epoch 11/50
50000/50000 [==============================] - 355s - loss: 0.5987 - val_loss: 0.5986
Epoch 12/50
50000/50000 [==============================] - 354s - loss: 0.5979 - val_loss: 0.5996
Epoch 13/50
50000/50000 [==============================] - 354s - loss: 0.5970 - val_loss: 0.5976
Epoch 14/50
50000/50000 [==============================] - 354s - loss: 0.5958 - val_loss: 0.5970
Epoch 15/50
50000/50000 [==============================] - 354s - loss: 0.5948 - val_loss: 0.5943
Epoch 16/50
50000/50000 [==============================] - 353s - loss: 0.5939 - val_loss: 0.5953
Epoch 17/50
50000/50000 [==============================] - 357s - loss: 0.5931 - val_loss: 0.5927
Epoch 18/50
50000/50000 [==============================] - 434s - loss: 0.5924 - val_loss: 0.5944
Epoch 19/50
50000/50000 [==============================] - 438s - loss: 0.5918 - val_loss: 0.5916
Epoch 20/50
50000/50000 [==============================] - 438s - loss: 0.5912 - val_loss: 0.5914
Epoch 21/50
50000/50000 [==============================] - 436s - loss: 0.5908 - val_loss: 0.5910
Epoch 22/50
50000/50000 [==============================] - 434s - loss: 0.5903 - val_loss: 0.5907
Epoch 23/50
50000/50000 [==============================] - 437s - loss: 0.5899 - val_loss: 0.5898
Epoch 24/50
50000/50000 [==============================] - 435s - loss: 0.5895 - val_loss: 0.5900
Epoch 25/50
50000/50000 [==============================] - 436s - loss: 0.5892 - val_loss: 0.5898
Epoch 26/50
50000/50000 [==============================] - 439s - loss: 0.5889 - val_loss: 0.5895
Epoch 27/50
50000/50000 [==============================] - 438s - loss: 0.5885 - val_loss: 0.5896
Epoch 28/50
50000/50000 [==============================] - 436s - loss: 0.5883 - val_loss: 0.5887
Epoch 29/50
50000/50000 [==============================] - 438s - loss: 0.5880 - val_loss: 0.5881
Epoch 30/50
50000/50000 [==============================] - 435s - loss: 0.5877 - val_loss: 0.5893
Epoch 31/50
50000/50000 [==============================] - 437s - loss: 0.5875 - val_loss: 0.5899
Epoch 32/50
50000/50000 [==============================] - 437s - loss: 0.5872 - val_loss: 0.5896
Epoch 33/50
50000/50000 [==============================] - 436s - loss: 0.5869 - val_loss: 0.5876
Epoch 34/50
50000/50000 [==============================] - 436s - loss: 0.5867 - val_loss: 0.5877
Epoch 35/50
50000/50000 [==============================] - 440s - loss: 0.5865 - val_loss: 0.5869
Epoch 36/50
50000/50000 [==============================] - 440s - loss: 0.5863 - val_loss: 0.5872
Epoch 37/50
50000/50000 [==============================] - 436s - loss: 0.5860 - val_loss: 0.5867
Epoch 38/50
50000/50000 [==============================] - 434s - loss: 0.5859 - val_loss: 0.5873
Epoch 39/50
50000/50000 [==============================] - 440s - loss: 0.5857 - val_loss: 0.5860
Epoch 40/50
50000/50000 [==============================] - 437s - loss: 0.5855 - val_loss: 0.5862
Epoch 41/50
50000/50000 [==============================] - 438s - loss: 0.5853 - val_loss: 0.5861
Epoch 42/50
50000/50000 [==============================] - 438s - loss: 0.5852 - val_loss: 0.5862
Epoch 43/50
50000/50000 [==============================] - 436s - loss: 0.5850 - val_loss: 0.5857
Epoch 44/50
50000/50000 [==============================] - 439s - loss: 0.5849 - val_loss: 0.5859
Epoch 45/50
50000/50000 [==============================] - 437s - loss: 0.5847 - val_loss: 0.5852
Epoch 46/50
50000/50000 [==============================] - 435s - loss: 0.5846 - val_loss: 0.5856
Epoch 47/50
50000/50000 [==============================] - 435s - loss: 0.5845 - val_loss: 0.5847
Epoch 48/50
50000/50000 [==============================] - 433s - loss: 0.5843 - val_loss: 0.5855
Epoch 49/50
50000/50000 [==============================] - 436s - loss: 0.5842 - val_loss: 0.5845
Epoch 50/50
50000/50000 [==============================] - 437s - loss: 0.5841 - val_loss: 0.5845
Out[9]:
<keras.callbacks.History at 0x13583b00>

In [4]:
from keras.models import load_model

#autoencoder.save('cifar10_autoencoders_9.h5')  # creates a HDF5 file 'my_model.h5'
#del model  # deletes the existing model

# returns a compiled model
# identical to the previous one
autoencoder = load_model('cifar10_autoencoders_9.h5')


Using TensorFlow backend.

In [10]:
import matplotlib.pyplot as plt

decoded_imgs = autoencoder.predict(x_test)

n = 10
plt.figure(figsize=(20, 4))
for i in range(n):
    # display original
    ax = plt.subplot(2, n, i + 1)
    plt.imshow(x_test[i].reshape(32, 32, 3))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)

    # display reconstruction
    ax = plt.subplot(2, n, i + n + 1)
    plt.imshow(decoded_imgs[i].reshape(32, 32, 3))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()


First weights layer


In [11]:
import matplotlib.pyplot as plt
import numpy as np

n = 8

for i in range(n):
    fig = plt.figure(figsize=(1,1))
    conv_1 = np.asarray(autoencoder.layers[1].get_weights())[0][:,:,0,i]
    ax = fig.add_subplot(111)
    plt.imshow(conv_1, cmap = 'gray')
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
    plt.show()



In [13]:
from keras import backend as K

# K.learning_phase() is a flag that indicates if the network is in training or
# predict phase. It allow layer (e.g. Dropout) to only be applied during training
inputs = [K.learning_phase()] + autoencoder.inputs

_layer1_f = K.function(inputs, [x2])
def convout1_f(X):
    # The [0] is to disable the training phase flag
    return _layer1_f([0] + [X])

_lay_f = K.function(inputs, [x1])
def convout12_f(X):
    # The [0] is to disable the training phase flag
    return _lay_f([0] + [X])

_layer2_f = K.function(inputs, [x4])
def convout2_f(X):
    # The [0] is to disable the training phase flag
    return _layer2_f([0] + [X])

_layer3_f = K.function(inputs, [encoded])
def convout3_f(X):
    # The [0] is to disable the training phase flag
    return _layer3_f([0] + [X])

_up_layer1_f = K.function(inputs, [x6])
def convout4_f(X):
    # The [0] is to disable the training phase flag
    return _up_layer1_f([0] + [X])

_up_layer2_f = K.function(inputs, [x8])
def convout5_f(X):
    # The [0] is to disable the training phase flag
    return _up_layer2_f([0] + [X])

_up_layer3_f = K.function(inputs, [x10])
def convout6_f(X):
    # The [0] is to disable the training phase flag
    return _up_layer3_f([0] + [X])

_up_layer4_f = K.function(inputs, [decoded])
def convout7_f(X):
    # The [0] is to disable the training phase flag
    return _up_layer4_f([0] + [X])

In [21]:
for i in range(4):
    
    #x = x_test[i:i+1]

    #Plotting conv_1
    #i = 1
    x = x_test[i:i+1]
    check = np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0)

    temp = x[0,:,:,:]
    fig, axes = plt.subplots(1, 1, figsize=(2, 2))
    plt.imshow(temp)
    plt.show()


    k = 0
    while k < check.shape[2]:
        #plt.figure()
        #plt.subplot(231 + i)
        fig, axes = plt.subplots(4, 4, figsize=(5, 5))
        for i in range(4):
            for j in range(4):
                axes[i,j].imshow(check[:,:,k], cmap = 'gray')
                k += 1
        #axes[0, 0].imshow(R, cmap='jet')
        #plt.imshow(check[:,:,i])

    plt.show()



In [22]:
#Plotting conv 2

for i in range(4):
    x = x_test[i:i+1]
    check = np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0)

    temp = x[0,:,:,:]
    fig, axes = plt.subplots(1, 1, figsize=(3, 3))
    plt.imshow(temp)
    plt.show()


    k = 0
    while k < check.shape[2]:
        #plt.figure()
        #plt.subplot(231 + i)
        fig, axes = plt.subplots(2, 4, figsize=(5, 5))
        for i in range(2):
            for j in range(4):
                axes[i,j].imshow(check[:,:,k])
                k += 1
        #axes[0, 0].imshow(R, cmap='jet')
        #plt.imshow(check[:,:,i])

    plt.show()



In [24]:
#Plotting conv 3

for i in range(4):
    x = x_test[i:i+1]
    check = np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0)

    temp = x[0,:,:,:]
    fig, axes = plt.subplots(1, 1, figsize=(3, 3))
    plt.imshow(temp)
    plt.show()



    k = 0
    while k < check.shape[2]:
        #plt.figure()
        #plt.subplot(231 + i)
        fig, axes = plt.subplots(2, 4, figsize=(5, 5))
        for i in range(2):
            for j in range(4):
                axes[i,j].imshow(check[:,:,k])
                k += 1
        #axes[0, 0].imshow(R, cmap='jet')
        #plt.imshow(check[:,:,i])


        plt.show()



In [26]:
#Plotting conv_4

for i in range(4):
    x = x_test[i:i+1]
    check = np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0)

    temp = x[0,:,:,:]
    fig, axes = plt.subplots(1, 1, figsize=(3, 3))
    plt.imshow(temp)
    plt.show()

    k = 0
    while k < check.shape[2]:
        #plt.figure()
        #plt.subplot(231 + i)
        fig, axes = plt.subplots(2, 4, figsize=(5, 5))
        for i in range(2):
            for j in range(4):
                axes[i,j].imshow(check[:,:,k])
                k += 1
        #axes[0, 0].imshow(R, cmap='jet')
        #plt.imshow(check[:,:,i])

        plt.show()



In [27]:
#Plotting conv_4

for i in range(4):
    x = x_test[i:i+1]
    check = np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0)
    
    temp = x[0,:,:,:]
    fig, axes = plt.subplots(1, 1, figsize=(3, 3))
    plt.imshow(temp)
    plt.show()

    k = 0
    while k < check.shape[2]:
        #plt.figure()
        #plt.subplot(231 + i)
        fig, axes = plt.subplots(2, 4, figsize=(5, 5))
        for i in range(2):
            for j in range(4):
                axes[i,j].imshow(check[:,:,k])
                k += 1
        #axes[0, 0].imshow(R, cmap='jet')
        #plt.imshow(check[:,:,i])

    plt.show()



In [35]:
#Plotting conv_5

for z in range(4):
    x = x_test[z:z+1]
    check = np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0)
    
    temp = x[0,:,:,:]
    fig, axes = plt.subplots(1, 1, figsize=(3, 3))
    plt.imshow(temp)
    plt.show()

    k = 0
    while k < check.shape[2]:
        #plt.figure()
        #plt.subplot(231 + i)
        fig, axes = plt.subplots(2, 4, figsize=(5, 5))
        for i in range(2):
            for j in range(4):
                axes[i,j].imshow(check[:,:,k])
                k += 1
        #axes[0, 0].imshow(R, cmap='jet')
        #plt.imshow(check[:,:,i])

    plt.show()



In [36]:
#Plotting conv_6
for z in range(4):
    x = x_test[z:z+1]
    check = np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0)
    
    temp = x[0,:,:,:]
    fig, axes = plt.subplots(1, 1, figsize=(3, 3))
    plt.imshow(temp)
    plt.show()


    k = 0
    while k < check.shape[2]:
        #plt.figure()
        #plt.subplot(231 + i)
        fig, axes = plt.subplots(4, 4, figsize=(5, 5))
        for i in range(4):
            for j in range(4):
                axes[i,j].imshow(check[:,:,k])
                k += 1
        #axes[0, 0].imshow(R, cmap='jet')
        #plt.imshow(check[:,:,i])

    plt.show()



In [39]:
#Final decoded images

decoded_imgs = autoencoder.predict(x_test)

n = 4
plt.figure(figsize=(20, 4))
for i in range(n):
    # display original
    ax = plt.subplot(2, n, i + 1)
    plt.imshow(x_test[i].reshape(32, 32, 3))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)

    # display reconstruction
    ax = plt.subplot(2, n, i + n + 1)
    plt.imshow(decoded_imgs[i].reshape(32, 32, 3))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()



In [ ]: